# loading packages
if (!require(tidyverse)) {
install.packages('tidyverse')
}
if (!require(lmerTest)) {
install.packages('lmerTest')
}
library(specr)
library(mediation)
library(dplyr)
library(pander)
# defining palette
palette_cond = wesanderson::wes_palette("Zissou1", n = 2, type = "continuous")
palette_cond = c(palette_cond[1:2], "black")
palette = c(palette_cond[1:3], "grey50")
# Data was cleaned using the `../data_cleaning.Rmd` script.
data_pilot1 = read.csv("../../covid19_study1_pilot/covid19_study1_pilot_clean_long.csv", stringsAsFactors = FALSE)
data_pilot2 = read.csv("../../covid19_study1_pilot2/covid19_study1_pilot2_clean_long.csv", stringsAsFactors = FALSE)
data_combined = read.csv("../../covid19_study1_pilot/covid19_study1_pilot_clean_long.csv", stringsAsFactors = FALSE) %>%
bind_rows(read.csv("../../covid19_study1_pilot2/covid19_study1_pilot2_clean_long.csv", stringsAsFactors = FALSE)) %>%
mutate(SID = sprintf("%s_%s", study, SID))The overarching goal of this project is to determine the degree to which several message framing interventions might enhance message effectiveness and intentions, norms, and beliefs related to social distancing. Specifically, here we test the effect of humorous framing of health messages promoting social distancing behavior. We use two types of humorous framings:
Participants were randomly assigned to either a message framing intervention group (using encouraging or mocking humor), a control message group, or a group that saw no messages.Each participant in the intervention and message control groups saw a series of 5 messages about social distancing related to COVID-19 randomly sampled from a pool of 15 messages for pilot 2, which previously normed for argument strength (M = 4.16, SD = 0.14, possible range = 1-5). Each message was created to look like an instagram post that included a visual message about COVID-19 accompanied by a “post” about the message.The message control condition contained this stem only, whereas the humorous-framing conditions contained additional text framing the messages humorously (i.e., adding a joke, using the stem as premise). Particpants then completed various outcome and individual differences measures.
df <- read.csv('pretest1/COVID-19 humorous messages testing_March 26, 2020_20.08.csv')
### Extract variable name data
varnames<- df[1:2,]
df <- df[3:dim(df)[1],]
### Filter based on progress
df$Progress<- as.numeric(as.character(df$Progress))
df <- df[df$Progress >= 90,]
### Recoding values
recoding_cols <- colnames(df)[11:198]
for (c in recoding_cols) {
df[,c] <- recode(df[,c], `Not at all funny` = 1, `A bit funny` = 2, `Somewhat funny` = 3, `Very funny` = 4, `Extremely funny` = 5, `Not at all mocking` = 1, `A bit mocking` = 2, `Somewhat mocking` = 3, `Very mocking` = 4, `Extremely mocking` = 5)
}
### Extract encouraging (non-mocking) messages
df_enc <- df %>%
dplyr::select(!matches("^moc_")) %>%
mutate(attention_check = enc_1.1_11 == 3 & enc_1.2_11 == 1 &
enc_3.1_11 == 5 & enc_3.2_11 == 2 &
enc_5.1_10 == 1 & enc_5.2_10 == 3) %>% # attention checks
filter(attention_check == TRUE) %>%
dplyr::select(-enc_1.1_11, -enc_1.2_11, -enc_3.1_11, -enc_3.2_11, -enc_5.1_10, -enc_5.2_10) # removing attention check variables
### convert from wide to long form
df_enc <- df_enc %>%
dplyr::select(matches("enc_")) %>%
mutate(SID = sprintf("S%02d", seq.int(nrow(.)))) %>%
gather("item", "score", -SID) %>%
extract(item, c("block", "question_type", "msg_number"), "enc_([1-5]).([1-2])_([0-9]+)", remove = TRUE) %>%
mutate(item = sprintf("enc_%s_%s", block, msg_number)) %>%
mutate(question_type = ifelse(question_type == 1, "funniness",
ifelse(question_type == 2, "mocking", "other"))) %>%
dplyr::select(SID, item, question_type, score) %>%
spread(question_type, score)
### load item texts and merge with data frame
item_texts <- read.csv('pretest1/encouraging_stim.csv') %>%
dplyr::select(item, text)
df_enc <- df_enc %>%
left_join(., item_texts, by = "item")We ran a short pre-test to test our intended manipulations of funniness and mockingness in the non-mocking humorous texts we designed. We recuited 32 participants to rate how funny (1 = Not at all funny, 5 = Extremely funny) and mocking (1 = Not at all mocking, 5 = Extremely mocking) these humorous texts were.
# Loading data and formatting it to make it analyzable
df <- read.csv('pretest2/COVID-19 humorous messages testing (mocking only)_March 31, 2020_18.26.csv')
### Extract variable name data
varnames<- df[1:2,]
df <- df[3:dim(df)[1],]
### Filter based on progress
df$Progress<- as.numeric(as.character(df$Progress))
df <- df[df$Progress >= 90,]
### Recoding values
recoding_cols <- colnames(df)[11:64]
for (c in recoding_cols) {
df[,c] <- recode(df[,c], `Not at all funny` = 1, `A bit funny` = 2, `Somewhat funny` = 3, `Very funny` = 4, `Extremely funny` = 5, `Not at all mocking` = 1, `A bit mocking` = 2, `Somewhat mocking` = 3, `Very mocking` = 4, `Extremely mocking` = 5)
}
### Removing unwanted columns
df <- df[,c("ResponseId", recoding_cols)]
### Filtering based on attention checks
df$attention_check <- df$att_check_funniness == 4 & df$att_check_mocking == 3 &
df$att_check_funniness.1 == 1 & df$att_check_mocking.1 == 4
df <- df[df$attention_check,]
### Removing attention check variables
df$att_check_funniness <- NULL
df$att_check_funniness.1 <- NULL
df$att_check_mocking <- NULL
df$att_check_mocking.1 <- NULL
df$attention_check <- NULL
### convert from wide to long form
list_items <- colnames(df)[2:51]
df <- df %>%
gather(key = "item", value = "value", list_items) %>%
separate(item, c("image", "joke_number", "rating_type")) %>%
unite("item", c("image", "joke_number")) %>%
spread(rating_type, value)
### extracting list of joke texts
varnames <- varnames[1,list_items]
varnames <- varnames %>%
gather(key = "item", value = "text") %>%
separate(item, c("image", "joke_number", "rating_type")) %>%
unite("item", c("image", "joke_number"))
varnames <- varnames[varnames$rating_type == "funniness",]
varnames <- varnames %>%
separate(text, c("remove_this", "joke_text"), sep = " - Please indicate how funny you find the following messages. - ") %>%
separate(item, c("remove_this2", "item"), sep = "X")
varnames$remove_this <- NULL
varnames$remove_this2 <- NULL
varnames$rating_type <- NULL
### merge joke texts with the data
df <- df %>%
separate(item, c("remove_this", "item"), sep = "X") %>%
dplyr::select(-remove_this)
df <- merge(df, varnames, by = "item")We ran a short pre-test to test our intended manipulations of funniness and mockingness in the mocking humorous texts we designed. We recuited 37 participants to rate how funny (1 = Not at all funny, 5 = Extremely funny) and mocking (1 = Not at all mocking, 5 = Extremely mocking) these humorous texts were.
df_items <- df %>%
group_by(item) %>%
summarise(funniness = mean(funniness), mocking = mean(mocking))
df_items <- merge(df_items, varnames, by = "item")
disp_items <- df_items %>%
dplyr::select(item, joke_text, funniness, mocking) %>%
arrange(desc(funniness))
pander(disp_items, style = "grid")| item | joke_text | funniness | mocking |
|---|---|---|---|
| 32_1 | It’s really pretty simple. Stay at home as much as possible, and meet with co-workers online. That is right, you can now mute your boss, they won’t even know. | 3.054 | 1.919 |
| 09_2 | It’s not just about you getting infected, it’s about you infecting others #covid19. So unless you’re willing to trade your bathing suit for a hazmat suit this spring break, stay home. | 2.703 | 2.054 |
| 20_1 | We are staying home today to make a difference in our fight against the coronavirus. No one likes that one guy from work who is DYING to be at the office. No Gary, I am NOT interested in martini Wednesdays. | 2.595 | 2.595 |
| 24_1 | Avoiding even one social contact can have a huge impact on limiting the spread of #covid19. However some people will be irresponsible and stupid enough to attend social gatherings. Avoid them, avoid them like you avoid people with clipboard on streets. | 2.568 | 2.568 |
| 31_1 | Staying home helps stop the spread of #coronavirus. And yet some people will insist on NOT working from home. Why? Cause you miss passing by co-workers on the way to the bathroom? | 2.568 | 2.459 |
| 31_2 | Staying home helps stop the spread of #coronavirus. There will be more spring breaks, there will be more Coachellas. Trust me, there will be more Coachellas. | 2.486 | 1.973 |
| 07_2 | Avoid large gatherings and stay at home as much as possible #covid19. People bragging about avoiding social distancing online are the same people who would crowd left lanes on highways. | 2.459 | 2.405 |
| 07_1 | Avoid large gatherings, stay at home as much as possible #covid19 And don’t worry, you’re not missing out. This year at Coachella the audience will be a hologram. | 2.432 | 1.811 |
| 23_1 | Avoiding crowds and staying home as much as possible helps protect the vulnerable. Don’t travel and communicate the damn disease like a coronavirus traveling salesman. | 2.405 | 2.189 |
| 09_1 | It’s not just about you getting infected, it’s about you infecting others #covid19. Hanging out with friends right now is the moral equivalent of sneezing at an old lady and saying “peace out” | 2.324 | 2.162 |
| 15_2 | Social distancing is critical for reducing exposure to #covid19. Unless you are in a hazmat suit, you are definitely speading some virus unwittingly. And I am guessing these kids are not wearing a hazmat suit on their spring break. | 2.324 | 2.73 |
| 10_2 | Try to stay home and not be a contagion vector. Unless you want to infect all those at higher risk than you, who might actually die from #covid19, just because you couldn’t scratch the itch of getting a flaming Dr. Pepper shot at 2 in the afternoon on a Tuesday. | 2.27 | 2.676 |
| 25_1 | Thinking of going out for a cheeky last pint? Don’t. Uh-oh you are leaving the door. Darnit you can trasmit the disease! Don’t do it! And welp you have now shut the door behind you. We are all going to die. | 2.27 | 2.405 |
| 06_1 | Staying home protects our community by stopping the spread of #covid19. Not staying at home, and I am trying to put it delicately, does NOT stop the spread of virus, instead helps it. So please, put that pair of floral boxers down, leave the store and get back home. | 2.243 | 2.486 |
| 12_2 | Coronavirus is far more dangerous than the common flu. If you think this won’t affect you think again. Then if you still think this won’t affect you, think again again. | 2.216 | 1.919 |
| 08_1 | Practice social distancing and help slow the spread of coronavirus! Or you know, whatever, go infect everyone in crowded places like a sociopath. | 2.189 | 2.459 |
| 15_1 | Social distancing is critical for reducing exposure to #covid19. You must stay home to stop hundreds of others from being infected! Not staying home is the moral equivalent of coughing on an old lady’s face and then shrugging. | 2.108 | 2.054 |
| 24_2 | Avoiding even one social contact can have a huge impact on limiting the spread of #covid19. But yeah, going pub crawling with your friends is WAY more important of course. | 2.108 | 2.865 |
| 10_1 | Think of all the people who might get infected because of you if you venture out. Instead, if you really want to “go viral” stay home and make a YouTube video. | 2.081 | 1.73 |
| 17_2 | We all have a role to play in stopping the spread of #coronavirus, so stay home if you can. Too many have already died after getting infected from some special people trying to act too smart and trivializing the virus. These are the same special people who would wear headphones while jaywalking. | 2.081 | 2.676 |
| 17_1 | We all have a role to play in stopping the spread of #coronavirus, so stay home if you can. At this point, impulse adopting a DOG would be smarter than going out. | 2.054 | 2 |
| 32_2 | It’s really pretty simple. Stay at home as much as possible. Simple enough, right? Well, tell that to the people who are finding a global pandemic to be just convenient enough to make spring break plans. | |
2.459 |
| 18_1 | Here is a message from doctors and nurses fighting on the front lines. Let’s stay home to support them in this fight against #coronavirus. The people who will still go out to crowded areas after reading this plea from doctors and nurses might finally answer the question: is it possible to be more reprehensible than an internet troll? | 1.946 | 2.405 |
| 08_2 | Practice social distancing and help slow the spread of coronavirus! hose who think they are too good for social distancing are the same people who don’t tip waiters at restaurants. | 1.892 | 2.622 |
| 12_1 | Coronavirus is far more dangerous than the common flu. Whoever told you that “it is just 0.5% it is not gonna kill us”, well even if it doesn’t kill them, it will kill 480,000 people just in the US. Whoever that jerk is, doesn’t deserve your attention anymore. Admit it, their social media was annoying to begin with. | 1.892 | 2.351 |
df_final_set <- df_items %>%
separate(item, c("image", "joke_number"), sep = "_") %>%
mutate(image = as.numeric(image)) %>%
mutate(joke_number = as.numeric(joke_number)) %>%
group_by(image) %>%
filter(funniness == max(funniness)) #%>%
disp_items <- df_final_set %>%
dplyr::select(image, joke_text, funniness, mocking) %>%
arrange(desc(funniness))
pander(disp_items, style = "grid")| image | joke_text | funniness | mocking |
|---|---|---|---|
| 32 | It’s really pretty simple. Stay at home as much as possible, and meet with co-workers online. That is right, you can now mute your boss, they won’t even know. | 3.054 | 1.919 |
| 9 | It’s not just about you getting infected, it’s about you infecting others #covid19. So unless you’re willing to trade your bathing suit for a hazmat suit this spring break, stay home. | 2.703 | 2.054 |
| 20 | We are staying home today to make a difference in our fight against the coronavirus. No one likes that one guy from work who is DYING to be at the office. No Gary, I am NOT interested in martini Wednesdays. | 2.595 | 2.595 |
| 24 | Avoiding even one social contact can have a huge impact on limiting the spread of #covid19. However some people will be irresponsible and stupid enough to attend social gatherings. Avoid them, avoid them like you avoid people with clipboard on streets. | 2.568 | 2.568 |
| 31 | Staying home helps stop the spread of #coronavirus. And yet some people will insist on NOT working from home. Why? Cause you miss passing by co-workers on the way to the bathroom? | 2.568 | 2.459 |
| 7 | Avoid large gatherings and stay at home as much as possible #covid19. People bragging about avoiding social distancing online are the same people who would crowd left lanes on highways. | 2.459 | 2.405 |
| 23 | Avoiding crowds and staying home as much as possible helps protect the vulnerable. Don’t travel and communicate the damn disease like a coronavirus traveling salesman. | 2.405 | 2.189 |
| 15 | Social distancing is critical for reducing exposure to #covid19. Unless you are in a hazmat suit, you are definitely speading some virus unwittingly. And I am guessing these kids are not wearing a hazmat suit on their spring break. | 2.324 | 2.73 |
| 10 | Try to stay home and not be a contagion vector. Unless you want to infect all those at higher risk than you, who might actually die from #covid19, just because you couldn’t scratch the itch of getting a flaming Dr. Pepper shot at 2 in the afternoon on a Tuesday. | 2.27 | 2.676 |
| 25 | Thinking of going out for a cheeky last pint? Don’t. Uh-oh you are leaving the door. Darnit you can trasmit the disease! Don’t do it! And welp you have now shut the door behind you. We are all going to die. | 2.27 | 2.405 |
| 6 | Staying home protects our community by stopping the spread of #covid19. Not staying at home, and I am trying to put it delicately, does NOT stop the spread of virus, instead helps it. So please, put that pair of floral boxers down, leave the store and get back home. | 2.243 | 2.486 |
| 12 | Coronavirus is far more dangerous than the common flu. If you think this won’t affect you think again. Then if you still think this won’t affect you, think again again. | 2.216 | 1.919 |
| 8 | Practice social distancing and help slow the spread of coronavirus! Or you know, whatever, go infect everyone in crowded places like a sociopath. | 2.189 | 2.459 |
| 17 | We all have a role to play in stopping the spread of #coronavirus, so stay home if you can. Too many have already died after getting infected from some special people trying to act too smart and trivializing the virus. These are the same special people who would wear headphones while jaywalking. | 2.081 | 2.676 |
| 18 | Here is a message from doctors and nurses fighting on the front lines. Let’s stay home to support them in this fight against #coronavirus. The people who will still go out to crowded areas after reading this plea from doctors and nurses might finally answer the question: is it possible to be more reprehensible than an internet troll? | 1.946 | 2.405 |
Here, we test the effect of “encouraging” humor versus message control, which were non-humorous. Encouraging humor refers to humorous framing of health messages such that these messages were not overtly mocking any person or group of people.
## tidy data for analysis
data = data_pilot1
messages = data %>%
filter(condition %in% c("message control", "encouraging")) %>%
filter(grepl("msg", survey_name)) %>%
mutate(value = as.numeric(value),
value = ifelse(item == "cognition_2", abs(6 - value), value),
value = ifelse(item == "cognition_4", abs(6 - value), value),
value = ifelse(item == "cognition_6", abs(6 - value), value)) %>%
extract(item, "item", "msg_.*_(.*)") %>%
spread(survey_name, value) %>%
mutate(msg_favorability = msg_positive - msg_negative) %>%
dplyr::select(-msg_negative, -msg_positive) %>%
gather(survey_name, value, contains("msg")) %>%
mutate(item = sprintf("%s_%s", survey_name, item))
data_tidy = data %>%
filter(condition %in% c("message control", "encouraging")) %>%
mutate(condition = str_replace(condition, "-paired|-unpaired", "")) %>%
filter(grepl("cognition|intentions|norms_close|norms_town|beliefs|beliefs|politics_party|politics_conserv|^age$|gender", survey_name)) %>%
mutate(value = as.numeric(value)) %>%
bind_rows(messages)
control_vars = data %>%
filter(grepl("state|gender|^age$", survey_name)) %>%
dplyr::select(condition, SID, survey_name, value) %>%
unique() %>%
spread(survey_name, value) %>%
mutate(state = as.factor(state),
gender = recode(gender, "1" = "male", "2" = "female", "3" = "other", "4" = "prefer not to say"),
age = scale(as.numeric(age), center = TRUE, scale = FALSE))
data_person = data_tidy %>%
filter(grepl("msg|cognition|beliefs|intentions1_2|intentions1_4|intentions1_6|intentions1_10|norms_close1_2|norms_close1_4|norms_close1_6|norms_close1_10|norms_town1_2|norms_town1_4|norms_town1_6|norms_town1_10|politics_party|politics_conserv", item)) %>%
group_by(condition, SID, survey_name) %>%
summarize(value = mean(value, na.rm = TRUE))data_person %>%
dplyr::select(SID, condition) %>%
unique() %>%
group_by(condition) %>%
summarise(n = n())In this section, we summarize condition effects on message ratings and other subject-level DVs and covariates.
plot_cond = function(data, survey, item=TRUE, palette=palette) {
if (item == FALSE){
data %>%
filter(grepl(!!(survey), survey_name)) %>%
mutate(value = as.numeric(value)) %>%
ggplot(aes(survey_name, value, color = condition)) +
stat_summary(fun.data = "mean_cl_boot", geom = "pointrange", position = position_dodge(width = .5)) +
scale_color_manual(values = palette) +
labs(x = "", y = "value\n") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = "top")
} else {
data %>%
filter(grepl(!!(survey), survey_name)) %>%
mutate(value = as.numeric(value)) %>%
ggplot(aes(item, value, color = condition)) +
stat_summary(fun.data = "mean_cl_boot", geom = "pointrange", position = position_dodge(width = .5)) +
scale_color_manual(values = palette) +
labs(x = "", y = "value\n") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = "top")
}
}A summary of condition effects on message level ratings for each of the 15 messages tested in this study.
A summary of condition effects on message ratings and other DVs/covariates at person level.
A summary of condition effects on each survey item. ###### need for cognition
A summary of condition effects on each survey, aggregated across survey items. ###### intentions, beliefs, cognition
In this section, we summarize the effect of messages on message level ratings. These summaries are presented as: 1) collapsed over conditions, and 2) split by condition.
plot_compare = function(data, survey = ".*", palette, condition = FALSE) {
rating_means = data %>%
filter(grepl(!!(survey), survey_name)) %>%
group_by(survey_name) %>%
summarize(mean = mean(value))
if (condition == TRUE) {
data %>%
filter(grepl(!!(survey), survey_name)) %>%
ggplot(aes(message, value, color = condition)) +
stat_summary(fun.data = "mean_cl_boot") +
coord_flip() +
geom_hline(data = rating_means, aes(yintercept = mean), linetype = "dotted") +
facet_grid(~survey_name) +
labs(x = "message\n", y = "\nvalue") +
scale_color_manual(values = palette_cond) +
theme_minimal() +
theme(legend.position = "top")
} else {
data %>%
filter(grepl(!!(survey), survey_name)) %>%
ggplot(aes(message, value)) +
stat_summary(fun.data = "mean_cl_boot") +
coord_flip() +
geom_hline(data = rating_means, aes(yintercept = mean), linetype = "dotted") +
facet_grid(~survey_name) +
labs(x = "message\n", y = "\nvalue") +
scale_color_manual(values = palette_cond) +
theme_minimal() +
theme(legend.position = "top")
}
}data_comp = messages %>%
filter(!survey_name == "msg_familiarity") %>%
extract(item, "message", "msg_.*_([0-9]{2})", remove = FALSE)NOTE: These plots are substantively the same as the ones prested in “visualize condition effects > message-level” sub-section. These plots are presented here for ease of comparison with the message effects collapsed across conditions.
dvs_covs = data_person %>%
filter(grepl("intentions|cognition|norms|beliefs", survey_name)) %>%
group_by(survey_name) %>%
mutate(value = scale(value)) %>% #scale within survey
spread(survey_name, value)
data_mod = messages %>%
group_by(SID, survey_name) %>%
extract(item, "message", "msg_.*_([0-9]{2})", remove = FALSE) %>%
mutate(trial = row_number()) %>%
dplyr::select(-item) %>%
group_by(survey_name) %>%
mutate(value = scale(value)) %>% #scale within survey
spread(survey_name, value) %>%
left_join(., dvs_covs) %>%
mutate(condition = factor(condition, levels = c("message control", "encouraging", "mocking")),
SID = as.factor(SID)) %>%
ungroup() %>%
dplyr::select(-msg_familiarity)
data_mod_person = messages %>%
group_by(SID, survey_name) %>%
extract(item, "message", "msg_.*_([0-9]{2})", remove = FALSE) %>%
mutate(trial = row_number()) %>%
dplyr::select(-item) %>%
group_by(study, condition, SID, survey_name, survey_name) %>%
summarize(value = mean(value, na.rm = TRUE)) %>% # take the mean across messages
group_by(survey_name) %>%
mutate(value = scale(value)) %>% # scale across people
spread(survey_name, value) %>%
left_join(., dvs_covs)Models = lmer(DV ~ condition + (1 | SID) + (1 | message), data = data_mod)
motiv_self_1 = lmer(msg_motiv_self ~ condition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
motiv_other_1 = lmer(msg_motiv_other ~ condition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
rel_social_1 = lmer(msg_rel_social ~ condition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
share_1 = lmer(msg_share ~ condition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
favorability_1 = lmer(msg_favorability ~ condition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
cognition_1 = lm(cognition ~ condition, data = data_mod_person)
intentions_1 = lm(intentions1 ~ condition, data = data_mod_person)
norms_close_1 = lm(norms_close1 ~ condition, data = data_mod_person)
norms_town_1 = lm(norms_town1 ~ condition, data = data_mod_person)
beliefs_1 = lm(beliefs ~ condition, data = data_mod_person)## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: msg_motiv_self ~ condition + (1 | SID) + (1 | message)
## Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 381
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.4587 -0.3877 0.1177 0.6364 2.1968
##
## Random effects:
## Groups Name Variance Std.Dev.
## SID (Intercept) 0.5036 0.7097
## message (Intercept) 0.0000 0.0000
## Residual 0.5189 0.7204
## Number of obs: 150, groups: SID, 30; message, 15
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) -0.09288 0.18902 28.00000 -0.491 0.627
## conditionencouraging 0.21433 0.28715 28.00000 0.746 0.462
##
## Correlation of Fixed Effects:
## (Intr)
## cndtnncrgng -0.658
## convergence code: 0
## boundary (singular) fit: see ?isSingular
##
## Call:
## lm(formula = intentions1 ~ condition, data = data_mod_person)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.2845 -0.5676 0.2444 0.6670 1.1476
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.2723 0.2739 0.994 0.329
## conditionmessage control -0.4806 0.3638 -1.321 0.197
##
## Residual standard error: 0.9874 on 28 degrees of freedom
## Multiple R-squared: 0.05867, Adjusted R-squared: 0.02505
## F-statistic: 1.745 on 1 and 28 DF, p-value: 0.1972
##
## Call:
## lm(formula = norms_close1 ~ condition, data = data_mod_person)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.14328 -0.48744 0.01678 0.61092 1.96115
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.08179 0.28151 0.291 0.774
## conditionmessage control -0.14434 0.37397 -0.386 0.702
##
## Residual standard error: 1.015 on 28 degrees of freedom
## Multiple R-squared: 0.005292, Adjusted R-squared: -0.03023
## F-statistic: 0.149 on 1 and 28 DF, p-value: 0.7024
##
## Call:
## lm(formula = norms_town1 ~ condition, data = data_mod_person)
##
## Residuals:
## Min 1Q Median 3Q Max
## -1.8742 -0.4444 -0.1517 0.3836 2.7403
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.3322 0.2697 1.232 0.228
## conditionmessage control -0.5862 0.3582 -1.636 0.113
##
## Residual standard error: 0.9723 on 28 degrees of freedom
## Multiple R-squared: 0.08729, Adjusted R-squared: 0.0547
## F-statistic: 2.678 on 1 and 28 DF, p-value: 0.1129
##
## Call:
## lm(formula = beliefs ~ condition, data = data_mod_person)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.4426 -0.3871 -0.1832 -0.0804 5.0448
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.1218 0.2806 -0.434 0.668
## conditionmessage control 0.2149 0.3728 0.577 0.569
##
## Residual standard error: 1.012 on 28 degrees of freedom
## Multiple R-squared: 0.01173, Adjusted R-squared: -0.02356
## F-statistic: 0.3324 on 1 and 28 DF, p-value: 0.5689
Models = lmer(DV ~ condition x cognition + (1 | SID) + (1 | message), data = data_mod)
motiv_self_2 = lmer(msg_motiv_self ~ condition*cognition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
motiv_other_2 = lmer(msg_motiv_other ~ condition*cognition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
rel_social_2 = lmer(msg_rel_social ~ condition*cognition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
share_2 = lmer(msg_share ~ condition*cognition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
favorability_2 = lmer(msg_favorability ~ condition*cognition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
intentions_2 = lm(intentions1 ~ condition*cognition, data = data_mod_person)
norms_close_2 = lm(norms_close1 ~ condition*cognition, data = data_mod_person)
norms_town_2 = lm(norms_town1 ~ condition*cognition, data = data_mod_person)## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## msg_motiv_self ~ condition * cognition + (1 | SID) + (1 | message)
## Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 382.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.4315 -0.3855 0.1864 0.6029 2.1915
##
## Random effects:
## Groups Name Variance Std.Dev.
## SID (Intercept) 0.5278 0.7265
## message (Intercept) 0.0000 0.0000
## Residual 0.5189 0.7204
## Number of obs: 150, groups: SID, 30; message, 15
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) -0.10914 0.20902 26.00000 -0.522
## conditionencouraging 0.34173 0.32586 26.00000 1.049
## cognition 0.04412 0.21936 26.00000 0.201
## conditionencouraging:cognition 0.18648 0.32865 26.00000 0.567
## Pr(>|t|)
## (Intercept) 0.606
## conditionencouraging 0.304
## cognition 0.842
## conditionencouraging:cognition 0.575
##
## Correlation of Fixed Effects:
## (Intr) cndtnn cogntn
## cndtnncrgng -0.641
## cognition -0.387 0.248
## cndtnncrgn: 0.258 0.104 -0.667
## convergence code: 0
## boundary (singular) fit: see ?isSingular
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## msg_motiv_other ~ condition * cognition + (1 | SID) + (1 | message)
## Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 382.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1992 -0.4261 0.1823 0.5628 1.8936
##
## Random effects:
## Groups Name Variance Std.Dev.
## SID (Intercept) 0.552251 0.74314
## message (Intercept) 0.001565 0.03956
## Residual 0.515141 0.71773
## Number of obs: 150, groups: SID, 30; message, 15
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 0.04770 0.21316 25.77347 0.224
## conditionencouraging -0.02268 0.33194 25.98862 -0.068
## cognition -0.01703 0.22347 25.99936 -0.076
## conditionencouraging:cognition 0.18173 0.33482 26.00220 0.543
## Pr(>|t|)
## (Intercept) 0.825
## conditionencouraging 0.946
## cognition 0.940
## conditionencouraging:cognition 0.592
##
## Correlation of Fixed Effects:
## (Intr) cndtnn cogntn
## cndtnncrgng -0.641
## cognition -0.386 0.248
## cndtnncrgn: 0.258 0.104 -0.668
##
## Call:
## lm(formula = intentions1 ~ condition * cognition, data = data_mod_person)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.3060 -0.5791 0.2138 0.6747 1.2173
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.26133 0.32212 0.811 0.425
## conditionmessage control -0.48706 0.41987 -1.160 0.257
## cognition -0.02284 0.31534 -0.072 0.943
## conditionmessage control:cognition 0.07023 0.42347 0.166 0.870
##
## Residual standard error: 1.024 on 26 degrees of freedom
## Multiple R-squared: 0.05988, Adjusted R-squared: -0.0486
## F-statistic: 0.552 on 3 and 26 DF, p-value: 0.6513
mod1 = lm(norm_close ~ condition, data = data_mod_person)
mod2 = lm(DV ~ norm_close + condition, data = data_mod_person)
mediation_mod = mediate(mod1, mod2, sims=1000, treat=“condition”, mediator=“mediator”)
num_sims = 1000
mod1 <- lm(norms_close1 ~ condition, data = data_mod_person)
mod2 <- lm(msg_motiv_self ~ norms_close1 + condition, data = data_mod_person)
motiv_self_3 <- mediate(mod1, mod2, sims=num_sims, treat="condition", mediator="norms_close1")
mod1 <- lm(norms_close1 ~ condition, data = data_mod_person)
mod2 <- lm(msg_motiv_other ~ norms_close1 + condition, data = data_mod_person)
motiv_other_3 <- mediate(mod1, mod2, sims=num_sims, treat="condition", mediator="norms_close1")
mod1 <- lm(norms_close1 ~ condition, data = data_mod_person)
mod2 <- lm(msg_rel_social ~ norms_close1 + condition, data = data_mod_person)
rel_social_3 <- mediate(mod1, mod2, sims=num_sims, treat="condition", mediator="norms_close1")
mod1 <- lm(norms_close1 ~ condition, data = data_mod_person)
mod2 <- lm(msg_share ~ norms_close1 + condition, data = data_mod_person)
share_3 <- mediate(mod1, mod2, sims=num_sims, treat="condition", mediator="norms_close1")
mod1 <- lm(norms_close1 ~ condition, data = data_mod_person)
mod2 <- lm(msg_favorability ~ norms_close1 + condition, data = data_mod_person)
favorability_3 <- mediate(mod1, mod2, sims=num_sims, treat="condition", mediator="norms_close1")
mod1 <- lm(norms_close1 ~ condition, data = data_mod_person)
mod2 <- lm(intentions1 ~ norms_close1 + condition, data = data_mod_person)
intentions_3 <- mediate(mod1, mod2, sims=num_sims, treat="condition", mediator="norms_close1")##
## Causal Mediation Analysis
##
## Quasi-Bayesian Confidence Intervals
##
## Estimate 95% CI Lower 95% CI Upper p-value
## ACME -0.0452 -0.3432 0.20 0.71
## ADE -0.2254 -0.9471 0.47 0.54
## Total Effect -0.2706 -1.0075 0.49 0.50
## Prop. Mediated 0.0746 -2.1455 1.94 0.72
##
## Sample Size Used: 30
##
##
## Simulations: 1000
##
## Causal Mediation Analysis
##
## Quasi-Bayesian Confidence Intervals
##
## Estimate 95% CI Lower 95% CI Upper p-value
## ACME -0.0214 -0.2590 0.13 0.85
## ADE 0.1447 -0.6182 0.98 0.71
## Total Effect 0.1233 -0.6391 0.96 0.74
## Prop. Mediated 0.0030 -2.5993 1.94 0.96
##
## Sample Size Used: 30
##
##
## Simulations: 1000
##
## Causal Mediation Analysis
##
## Quasi-Bayesian Confidence Intervals
##
## Estimate 95% CI Lower 95% CI Upper p-value
## ACME -0.0476 -0.3694 0.23 0.71
## ADE -0.4132 -1.0933 0.23 0.25
## Total Effect -0.4608 -1.2055 0.24 0.24
## Prop. Mediated 0.1014 -1.6638 1.94 0.68
##
## Sample Size Used: 30
##
##
## Simulations: 1000
Here, we test the effect of “encouraging” and “mocking” humor versus message control, which were non-humorous. Mocking humor refers to to humorous framing of health messages such that these messages were ridiculing individuals who would choose to not follow COVID-19 related preventative measures (like social distancing), even though their circumstances allow them to follow those measures. In contrast, encouraging messages used humorous framings which did not overtly mock or ridicule any person or group of people. In this study, we used the following sets of stimuli:
In this analysis, we combine mocking-paired and mocking-unpaired into “mocking” condition, and similarly, we combine encouraging-paired and encouraging-unpaired into “encouraging” condition.
## tidy data for analysis
data = data_pilot2
messages = data %>%
filter(condition %in% c("message control", "encouraging-unpaired", "encouraging-paired",
"mocking-unpaired", "mocking-paired")) %>%
mutate(condition = str_replace(condition, "-paired|-unpaired", "")) %>%
filter(grepl("msg", survey_name)) %>%
mutate(value = as.numeric(value),
value = ifelse(item == "cognition_2", abs(6 - value), value),
value = ifelse(item == "cognition_4", abs(6 - value), value),
value = ifelse(item == "cognition_6", abs(6 - value), value)) %>%
extract(item, "item", "msg_.*_(.*)") %>%
spread(survey_name, value) %>%
mutate(msg_favorability = msg_positive - msg_negative) %>%
dplyr::select(-msg_negative, -msg_positive) %>%
gather(survey_name, value, contains("msg")) %>%
mutate(item = sprintf("%s_%s", survey_name, item))
data_tidy = data %>%
filter(condition %in% c("message control", "encouraging-unpaired", "encouraging-paired",
"mocking-unpaired", "mocking-paired")) %>%
mutate(condition = str_replace(condition, "-paired|-unpaired", "")) %>%
filter(grepl("cognition|intentions|norms_close|norms_town|beliefs_safe|beliefs_norms|politics_party|politics_conserv|^age$|gender", survey_name)) %>%
mutate(value = as.numeric(value)) %>%
bind_rows(messages)
control_vars = data %>%
filter(grepl("state|gender|^age$", survey_name)) %>%
dplyr::select(condition, SID, survey_name, value) %>%
unique() %>%
spread(survey_name, value) %>%
mutate(state = as.factor(state),
gender = recode(gender, "1" = "male", "2" = "female", "3" = "other", "4" = "prefer not to say"),
age = scale(as.numeric(age), center = TRUE, scale = FALSE))
data_person = data_tidy %>%
filter(grepl("msg|cognition|beliefs|intentions1_2|intentions1_4|intentions1_6|intentions1_10|norms_close1_2|norms_close1_4|norms_close1_6|norms_close1_10|norms_town1_2|norms_town1_4|norms_town1_6|norms_town1_10|politics_party|politics_conserv", item)) %>%
group_by(condition, SID, survey_name) %>%
summarize(value = mean(value, na.rm = TRUE))data_person %>%
dplyr::select(SID, condition) %>%
unique() %>%
group_by(condition) %>%
summarise(n = n())In this section, we summarize condition effects on message ratings and other subject-level DVs and covariates.
plot_cond = function(data, survey, item=TRUE, palette=palette) {
if (item == FALSE){
data %>%
filter(grepl(!!(survey), survey_name)) %>%
mutate(value = as.numeric(value)) %>%
ggplot(aes(survey_name, value, color = condition)) +
stat_summary(fun.data = "mean_cl_boot", geom = "pointrange", position = position_dodge(width = .5)) +
scale_color_manual(values = palette) +
labs(x = "", y = "value\n") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = "top")
} else {
data %>%
filter(grepl(!!(survey), survey_name)) %>%
mutate(value = as.numeric(value)) %>%
ggplot(aes(item, value, color = condition)) +
stat_summary(fun.data = "mean_cl_boot", geom = "pointrange", position = position_dodge(width = .5)) +
scale_color_manual(values = palette) +
labs(x = "", y = "value\n") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = "top")
}
}A summary of condition effects on message level ratings for each of the 15 messages tested in this study.
A summary of condition effects on message ratings and other DVs/covariates at person level.
A summary of condition effects on each survey item. ###### need for cognition
A summary of condition effects on each survey, aggregated across survey items. ###### intentions, beliefs, cognition
In this section, we summarize the effect of messages on message level ratings. These summaries are presented as: 1) collapsed over conditions, and 2) split by condition.
plot_compare = function(data, survey = ".*", palette, condition = FALSE) {
rating_means = data %>%
filter(grepl(!!(survey), survey_name)) %>%
group_by(survey_name) %>%
summarize(mean = mean(value))
if (condition == TRUE) {
data %>%
filter(grepl(!!(survey), survey_name)) %>%
ggplot(aes(message, value, color = condition)) +
stat_summary(fun.data = "mean_cl_boot") +
coord_flip() +
geom_hline(data = rating_means, aes(yintercept = mean), linetype = "dotted") +
facet_grid(~survey_name) +
labs(x = "message\n", y = "\nvalue") +
scale_color_manual(values = palette_cond) +
theme_minimal() +
theme(legend.position = "top")
} else {
data %>%
filter(grepl(!!(survey), survey_name)) %>%
ggplot(aes(message, value)) +
stat_summary(fun.data = "mean_cl_boot") +
coord_flip() +
geom_hline(data = rating_means, aes(yintercept = mean), linetype = "dotted") +
facet_grid(~survey_name) +
labs(x = "message\n", y = "\nvalue") +
scale_color_manual(values = palette_cond) +
theme_minimal() +
theme(legend.position = "top")
}
}data_comp = messages %>%
filter(!survey_name == "msg_familiarity") %>%
extract(item, "message", "msg_.*_([0-9]{2})", remove = FALSE)NOTE: These plots are substantively the same as the ones prested in “visualize condition effects > message-level” sub-section. These plots are presented here for ease of comparison with the message effects collapsed across conditions.
dvs_covs = data_person %>%
filter(grepl("intentions|cognition|norms|beliefs", survey_name)) %>%
group_by(survey_name) %>%
mutate(value = scale(value)) %>% #scale within survey
spread(survey_name, value)
data_mod = messages %>%
group_by(SID, survey_name) %>%
extract(item, "message", "msg_.*_([0-9]{2})", remove = FALSE) %>%
mutate(trial = row_number()) %>%
dplyr::select(-item) %>%
group_by(survey_name) %>%
mutate(value = scale(value)) %>% #scale within survey
spread(survey_name, value) %>%
left_join(., dvs_covs) %>%
mutate(condition = factor(condition, levels = c("message control", "encouraging", "mocking")),
SID = as.factor(SID)) %>%
ungroup() %>%
dplyr::select(-msg_familiarity)
data_mod_person = messages %>%
group_by(SID, survey_name) %>%
extract(item, "message", "msg_.*_([0-9]{2})", remove = FALSE) %>%
mutate(trial = row_number()) %>%
dplyr::select(-item) %>%
group_by(study, group, condition, SID, survey_name, survey_name) %>%
summarize(value = mean(value, na.rm = TRUE)) %>% # take the mean across messages
group_by(survey_name) %>%
mutate(value = scale(value)) %>% # scale across people
spread(survey_name, value) %>%
left_join(., dvs_covs)Models = lmer(DV ~ condition + (1 | SID) + (1 | message), data = data_mod)
motiv_self_1 = lmer(msg_motiv_self ~ condition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
motiv_other_1 = lmer(msg_motiv_other ~ condition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
rel_social_1 = lmer(msg_rel_social ~ condition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
share_1 = lmer(msg_share ~ condition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
favorability_1 = lmer(msg_favorability ~ condition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
cognition_1 = lm(cognition ~ condition, data = data_mod_person)
intentions_1 = lm(intentions1 ~ condition, data = data_mod_person)
norms_close_1 = lm(norms_close1 ~ condition, data = data_mod_person)
norms_town_1 = lm(norms_town1 ~ condition, data = data_mod_person)
beliefs_safe_others_1 = lm(beliefs_safe_others ~ condition, data = data_mod_person)
beliefs_safe_self_1 = lm(beliefs_safe_self ~ condition, data = data_mod_person)## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: msg_motiv_self ~ condition + (1 | SID) + (1 | message)
## Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 929.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.5320 -0.3285 0.1324 0.3684 2.8713
##
## Random effects:
## Groups Name Variance Std.Dev.
## SID (Intercept) 0.4609632 0.67894
## message (Intercept) 0.0001903 0.01379
## Residual 0.5126932 0.71603
## Number of obs: 369, groups: SID, 74; message, 15
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.1759 0.1372 70.0300 1.282 0.2041
## conditionencouraging -0.1304 0.2168 70.2302 -0.602 0.5494
## conditionmocking -0.4119 0.2057 70.0662 -2.002 0.0491 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) cndtnn
## cndtnncrgng -0.632
## condtnmckng -0.667 0.422
##
## Call:
## lm(formula = intentions1 ~ condition, data = data_mod_person)
##
## Residuals:
## Min 1Q Median 3Q Max
## -3.1541 -0.3342 0.3655 0.7311 0.9399
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.07621 0.22575 -0.338 0.737
## conditionmessage control 0.02089 0.29145 0.072 0.943
## conditionmocking 0.20888 0.30567 0.683 0.497
##
## Residual standard error: 1.01 on 71 degrees of freedom
## Multiple R-squared: 0.008636, Adjusted R-squared: -0.01929
## F-statistic: 0.3092 on 2 and 71 DF, p-value: 0.735
##
## Call:
## lm(formula = norms_close1 ~ condition, data = data_mod_person)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.8502 -0.5378 0.2473 0.6539 1.3002
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.4682 0.2143 -2.185 0.03221 *
## conditionmessage control 0.4793 0.2767 1.732 0.08755 .
## conditionmocking 0.8445 0.2902 2.910 0.00482 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.9584 on 71 degrees of freedom
## Multiple R-squared: 0.1067, Adjusted R-squared: 0.08149
## F-statistic: 4.238 on 2 and 71 DF, p-value: 0.01824
##
## Call:
## lm(formula = norms_town1 ~ condition, data = data_mod_person)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.49909 -0.44123 0.00739 0.69101 1.83882
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.4389 0.2135 -2.056 0.0435 *
## conditionmessage control 0.3906 0.2756 1.417 0.1608
## conditionmocking 0.8650 0.2891 2.993 0.0038 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.9547 on 71 degrees of freedom
## Multiple R-squared: 0.1134, Adjusted R-squared: 0.08847
## F-statistic: 4.542 on 2 and 71 DF, p-value: 0.01392
Models = lmer(DV ~ condition x cognition + (1 | SID) + (1 | message), data = data_mod)
motiv_self_2 = lmer(msg_motiv_self ~ condition*cognition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
motiv_other_2 = lmer(msg_motiv_other ~ condition*cognition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
rel_social_2 = lmer(msg_rel_social ~ condition*cognition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
share_2 = lmer(msg_share ~ condition*cognition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
favorability_2 = lmer(msg_favorability ~ condition*cognition + (1 | SID) + (1 | message),
data = data_mod,
control = lmerControl(optimizer = "bobyqa"))
intentions_2 = lm(intentions1 ~ condition*cognition, data = data_mod_person)
norms_close_2 = lm(norms_close1 ~ condition*cognition, data = data_mod_person)
norms_town_2 = lm(norms_town1 ~ condition*cognition, data = data_mod_person)## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## msg_motiv_self ~ condition * cognition + (1 | SID) + (1 | message)
## Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 934.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.5547 -0.3165 0.1174 0.3692 2.8569
##
## Random effects:
## Groups Name Variance Std.Dev.
## SID (Intercept) 0.4815259 0.6939
## message (Intercept) 0.0001664 0.0129
## Residual 0.5127043 0.7160
## Number of obs: 369, groups: SID, 74; message, 15
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 0.17615 0.13967 67.16753 1.261
## conditionencouraging -0.13031 0.22081 67.33454 -0.590
## conditionmocking -0.41153 0.20942 67.19324 -1.965
## cognition -0.04103 0.12904 67.98831 -0.318
## conditionencouraging:cognition 0.05924 0.28203 67.89793 0.210
## conditionmocking:cognition -0.04964 0.19256 67.92560 -0.258
## Pr(>|t|)
## (Intercept) 0.2116
## conditionencouraging 0.5571
## conditionmocking 0.0535 .
## cognition 0.7515
## conditionencouraging:cognition 0.8343
## conditionmocking:cognition 0.7973
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) cndtnn cndtnm cogntn cndtnn:
## cndtnncrgng -0.632
## condtnmckng -0.667 0.422
## cognition -0.008 0.005 0.005
## cndtnncrgn: 0.003 0.020 -0.002 -0.458
## cndtnmckng: 0.005 -0.003 -0.007 -0.670 0.307
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## msg_motiv_other ~ condition * cognition + (1 | SID) + (1 | message)
## Data: data_mod
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 945.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1917 -0.3495 0.0940 0.4510 3.0505
##
## Random effects:
## Groups Name Variance Std.Dev.
## SID (Intercept) 0.490205 0.70015
## message (Intercept) 0.009629 0.09813
## Residual 0.522938 0.72314
## Number of obs: 369, groups: SID, 74; message, 15
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 0.170068 0.143233 67.676649 1.187
## conditionencouraging -0.131674 0.225491 69.008253 -0.584
## conditionmocking -0.383273 0.214147 68.922520 -1.790
## cognition 0.027697 0.130310 68.009492 0.213
## conditionencouraging:cognition 0.120972 0.284652 67.789525 0.425
## conditionmocking:cognition 0.008489 0.194376 67.856866 0.044
## Pr(>|t|)
## (Intercept) 0.2392
## conditionencouraging 0.5612
## conditionmocking 0.0779 .
## cognition 0.8323
## conditionencouraging:cognition 0.6722
## conditionmocking:cognition 0.9653
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) cndtnn cndtnm cogntn cndtnn:
## cndtnncrgng -0.617
## condtnmckng -0.649 0.436
## cognition -0.007 0.001 0.001
## cndtnncrgn: 0.003 0.022 -0.001 -0.458
## cndtnmckng: 0.005 -0.001 -0.004 -0.670 0.307
##
## Call:
## lm(formula = intentions1 ~ condition * cognition, data = data_mod_person)
##
## Residuals:
## Min 1Q Median 3Q Max
## -3.1470 -0.4534 0.3932 0.7426 1.1121
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.07243 0.22955 -0.316 0.753
## conditionmessage control 0.01704 0.29629 0.058 0.954
## conditionmocking 0.20600 0.31074 0.663 0.510
## cognition 0.17040 0.33669 0.506 0.614
## conditionmessage control:cognition -0.16363 0.37861 -0.432 0.667
## conditionmocking:cognition -0.30414 0.38752 -0.785 0.435
##
## Residual standard error: 1.026 on 68 degrees of freedom
## Multiple R-squared: 0.01936, Adjusted R-squared: -0.05275
## F-statistic: 0.2685 on 5 and 68 DF, p-value: 0.9288
mod1 = lm(norm_close ~ condition, data = data_mod_person)
mod2 = lm(DV ~ norm_close + condition, data = data_mod_person)
mediation_mod = mediate(mod1, mod2, sims=1000, treat=“condition”, mediator=“mediator”)
num_sims = 1000
mod1 <- lm(norms_close1 ~ condition, data = data_mod_person)
mod2 <- lm(msg_motiv_self ~ norms_close1 + condition, data = data_mod_person)
motiv_self_3 <- mediate(mod1, mod2, sims=num_sims, treat="condition", mediator="norms_close1")
mod1 <- lm(norms_close1 ~ condition, data = data_mod_person)
mod2 <- lm(msg_motiv_other ~ norms_close1 + condition, data = data_mod_person)
motiv_other_3 <- mediate(mod1, mod2, sims=num_sims, treat="condition", mediator="norms_close1")
mod1 <- lm(norms_close1 ~ condition, data = data_mod_person)
mod2 <- lm(msg_rel_social ~ norms_close1 + condition, data = data_mod_person)
rel_social_3 <- mediate(mod1, mod2, sims=num_sims, treat="condition", mediator="norms_close1")
mod1 <- lm(norms_close1 ~ condition, data = data_mod_person)
mod2 <- lm(msg_share ~ norms_close1 + condition, data = data_mod_person)
share_3 <- mediate(mod1, mod2, sims=num_sims, treat="condition", mediator="norms_close1")
mod1 <- lm(norms_close1 ~ condition, data = data_mod_person)
mod2 <- lm(msg_favorability ~ norms_close1 + condition, data = data_mod_person)
favorability_3 <- mediate(mod1, mod2, sims=num_sims, treat="condition", mediator="norms_close1")
mod1 <- lm(norms_close1 ~ condition, data = data_mod_person)
mod2 <- lm(intentions1 ~ norms_close1 + condition, data = data_mod_person)
intentions_3 <- mediate(mod1, mod2, sims=num_sims, treat="condition", mediator="norms_close1")##
## Causal Mediation Analysis
##
## Quasi-Bayesian Confidence Intervals
##
## Estimate 95% CI Lower 95% CI Upper p-value
## ACME 0.0518 -0.0652 0.22 0.39
## ADE 0.1125 -0.4701 0.69 0.69
## Total Effect 0.1643 -0.4477 0.72 0.58
## Prop. Mediated 0.0784 -2.0272 3.05 0.71
##
## Sample Size Used: 74
##
##
## Simulations: 1000
##
## Causal Mediation Analysis
##
## Quasi-Bayesian Confidence Intervals
##
## Estimate 95% CI Lower 95% CI Upper p-value
## ACME 0.0468 -0.0879 0.21 0.47
## ADE 0.1394 -0.4625 0.71 0.66
## Total Effect 0.1862 -0.4026 0.74 0.52
## Prop. Mediated 0.0632 -4.6489 2.77 0.75
##
## Sample Size Used: 74
##
##
## Simulations: 1000
##
## Causal Mediation Analysis
##
## Quasi-Bayesian Confidence Intervals
##
## Estimate 95% CI Lower 95% CI Upper p-value
## ACME 0.27445 -0.01965 0.63 0.078 .
## ADE -0.26906 -0.78128 0.21 0.302
## Total Effect 0.00539 -0.59536 0.59 0.978
## Prop. Mediated 0.37773 -12.60033 19.44 0.908
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Sample Size Used: 74
##
##
## Simulations: 1000
social motivation